import numpy as np
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import pathlib
2024-11-26 12:54:34.073423: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. To enable the following instructions: AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.
import os, shutil, pathlib
original_dir = pathlib.Path("../data/kaggle_dogs_vs_cats/train")
new_base_dir = pathlib.Path("../data/kaggle_dogs_vs_cats_small")
def make_subset(subset_name, start_index, end_index):
for category in ("cat", "dog"):
dir = new_base_dir / subset_name / category
os.makedirs(dir)
fnames = [f"{category}.{i}.jpg" for i in range(start_index, end_index)]
for fname in fnames:
shutil.copyfile(src=original_dir / fname,
dst=dir / fname)
make_subset("train", start_index=0, end_index=1000)
make_subset("validation", start_index=1000, end_index=1500)
make_subset("test", start_index=1500, end_index=2500)
# This should point to the small dataset of the Kaggle Dogs vs Cats competition that was created in a previous notebook
data_folder = pathlib.Path('../data/kaggle_dogs_vs_cats_small')
from tensorflow.keras.utils import image_dataset_from_directory
train_dataset = image_dataset_from_directory(
data_folder / "train",
image_size=(180, 180),
batch_size=32)
validation_dataset = image_dataset_from_directory(
data_folder / "validation",
image_size=(180, 180),
batch_size=32)
test_dataset = image_dataset_from_directory(
data_folder / "test",
image_size=(180, 180),
batch_size=32)
Found 2000 files belonging to 2 classes. Found 1000 files belonging to 2 classes. Found 2000 files belonging to 2 classes.
import matplotlib.pyplot as plt
import numpy as np
# base info
print("Dataset Info:")
print(f"Train dataset size: {len(train_dataset)}")
print(f"Validation dataset size: {len(validation_dataset)}")
print(f"Test dataset size: {len(test_dataset)}")
# class names
class_names = train_dataset.class_names
print("\nClass names:", class_names)
# image counts
def count_images_per_class(dataset):
counts = {name: 0 for name in class_names}
for images, labels in dataset:
for label in labels:
counts[class_names[label.numpy()]] += 1
return counts
train_counts = count_images_per_class(train_dataset)
print("\nImages per class in training set:", train_counts)
# check some images
plt.figure(figsize=(10, 10))
for images, labels in train_dataset.take(1):
for i in range(9):
plt.subplot(3, 3, i + 1)
plt.imshow(images[i].numpy().astype("uint8"))
plt.title(class_names[labels[i]])
plt.axis("off")
plt.show()
# image statistics
def analyze_image_statistics(dataset):
pixel_mean = []
pixel_std = []
for images, _ in dataset:
pixel_mean.append(tf.reduce_mean(images))
pixel_std.append(tf.math.reduce_std(images))
return {
'mean': tf.reduce_mean(pixel_mean),
'std': tf.reduce_mean(pixel_std)
}
stats = analyze_image_statistics(train_dataset)
print("\nImage Statistics:")
print(f"Mean pixel value: {stats['mean']:.2f}")
print(f"Std pixel value: {stats['std']:.2f}")
# show images
plt.figure(figsize=(10, 5))
plt.bar(class_names, list(train_counts.values()))
plt.title('Number of Images per Class')
plt.ylabel('Count')
plt.show()
# check batch shape
for image_batch, labels_batch in train_dataset.take(1):
print("\nImage batch shape:", image_batch.shape)
print("Labels batch shape:", labels_batch.shape)
Dataset Info:
Train dataset size: 63
Validation dataset size: 32
Test dataset size: 63
Class names: ['cat', 'dog']
Images per class in training set: {'cat': 1000, 'dog': 1000}
2024-11-23 12:20:16.308261: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'Placeholder/_4' with dtype int32 and shape [2000]
[[{{node Placeholder/_4}}]]
2024-11-23 12:20:16.308825: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'Placeholder/_0' with dtype string and shape [2000]
[[{{node Placeholder/_0}}]]
Image Statistics: Mean pixel value: 113.95 Std pixel value: 65.51
2024-11-23 12:20:20.350424: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'Placeholder/_4' with dtype int32 and shape [2000]
[[{{node Placeholder/_4}}]]
2024-11-23 12:20:20.351295: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'Placeholder/_4' with dtype int32 and shape [2000]
[[{{node Placeholder/_4}}]]
Image batch shape: (32, 180, 180, 3) Labels batch shape: (32,)
1000 each for cats/dogs, prefect balanced data, efficient batch size of 32, and consistent 180x180 RGB images providing rich color information for feature extraction.
from tensorflow.keras import layers, models
inputs = keras.Input(shape=(180, 180, 3))
x = layers.Rescaling(1./255)(inputs)
x = layers.Conv2D(filters=32, kernel_size=3, activation="relu")(x)
x = layers.MaxPooling2D(pool_size=2)(x)
x = layers.Conv2D(filters=64, kernel_size=3, activation="relu")(x)
x = layers.MaxPooling2D(pool_size=2)(x)
x = layers.Conv2D(filters=128, kernel_size=3, activation="relu")(x)
x = layers.MaxPooling2D(pool_size=2)(x)
x = layers.Conv2D(filters=256, kernel_size=3, activation="relu")(x)
x = layers.MaxPooling2D(pool_size=2)(x)
x = layers.Conv2D(filters=256, kernel_size=3, activation="relu")(x)
x = layers.Flatten()(x)
outputs = layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(inputs=inputs, outputs=outputs)
model.summary()
Model: "model"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_1 (InputLayer) [(None, 180, 180, 3)] 0
rescaling_1 (Rescaling) (None, 180, 180, 3) 0
conv2d_3 (Conv2D) (None, 178, 178, 32) 896
max_pooling2d_3 (MaxPooling (None, 89, 89, 32) 0
2D)
conv2d_4 (Conv2D) (None, 87, 87, 64) 18496
max_pooling2d_4 (MaxPooling (None, 43, 43, 64) 0
2D)
conv2d_5 (Conv2D) (None, 41, 41, 128) 73856
max_pooling2d_5 (MaxPooling (None, 20, 20, 128) 0
2D)
conv2d_6 (Conv2D) (None, 18, 18, 256) 295168
max_pooling2d_6 (MaxPooling (None, 9, 9, 256) 0
2D)
conv2d_7 (Conv2D) (None, 7, 7, 256) 590080
flatten_1 (Flatten) (None, 12544) 0
dense_2 (Dense) (None, 1) 12545
=================================================================
Total params: 991,041
Trainable params: 991,041
Non-trainable params: 0
_________________________________________________________________
model.compile(loss="binary_crossentropy",
optimizer="rmsprop",
metrics=["accuracy"])
callbacks = [
keras.callbacks.ModelCheckpoint(
filepath="./models/convnet_from_scratch_1123.keras",
save_best_only=True,
monitor="val_loss")
]
history = model.fit(
train_dataset,
epochs=30,
validation_data=validation_dataset,
callbacks=callbacks)
Epoch 1/30 63/63 [==============================] - 63s 968ms/step - loss: 0.7014 - accuracy: 0.5150 - val_loss: 0.6923 - val_accuracy: 0.5000 Epoch 2/30 63/63 [==============================] - 65s 1s/step - loss: 0.6924 - accuracy: 0.5345 - val_loss: 0.6905 - val_accuracy: 0.5030 Epoch 3/30 63/63 [==============================] - 58s 923ms/step - loss: 0.6814 - accuracy: 0.5595 - val_loss: 0.6537 - val_accuracy: 0.6100 Epoch 4/30 63/63 [==============================] - 61s 972ms/step - loss: 0.6475 - accuracy: 0.6210 - val_loss: 0.6365 - val_accuracy: 0.6600 Epoch 5/30 63/63 [==============================] - 64s 1s/step - loss: 0.6184 - accuracy: 0.6715 - val_loss: 0.6036 - val_accuracy: 0.6630 Epoch 6/30 63/63 [==============================] - 60s 949ms/step - loss: 0.6016 - accuracy: 0.6940 - val_loss: 0.5767 - val_accuracy: 0.6990 Epoch 7/30 63/63 [==============================] - 63s 1s/step - loss: 0.5344 - accuracy: 0.7365 - val_loss: 0.7745 - val_accuracy: 0.6030 Epoch 8/30 63/63 [==============================] - 58s 923ms/step - loss: 0.4992 - accuracy: 0.7680 - val_loss: 0.5449 - val_accuracy: 0.7320 Epoch 9/30 63/63 [==============================] - 64s 1s/step - loss: 0.4389 - accuracy: 0.7905 - val_loss: 0.6150 - val_accuracy: 0.7140 Epoch 10/30 63/63 [==============================] - 60s 957ms/step - loss: 0.3980 - accuracy: 0.8205 - val_loss: 0.5864 - val_accuracy: 0.7070 Epoch 11/30 63/63 [==============================] - 62s 978ms/step - loss: 0.3325 - accuracy: 0.8555 - val_loss: 0.5900 - val_accuracy: 0.7390 Epoch 12/30 63/63 [==============================] - 58s 919ms/step - loss: 0.2809 - accuracy: 0.8835 - val_loss: 0.7038 - val_accuracy: 0.7270 Epoch 13/30 63/63 [==============================] - 59s 929ms/step - loss: 0.2185 - accuracy: 0.9125 - val_loss: 0.6828 - val_accuracy: 0.7240 Epoch 14/30 63/63 [==============================] - 58s 925ms/step - loss: 0.1831 - accuracy: 0.9245 - val_loss: 0.8191 - val_accuracy: 0.7390 Epoch 15/30 63/63 [==============================] - 61s 966ms/step - loss: 0.1306 - accuracy: 0.9445 - val_loss: 0.8532 - val_accuracy: 0.7390 Epoch 16/30 63/63 [==============================] - 63s 997ms/step - loss: 0.0883 - accuracy: 0.9685 - val_loss: 0.9935 - val_accuracy: 0.7250 Epoch 17/30 63/63 [==============================] - 63s 1000ms/step - loss: 0.0871 - accuracy: 0.9680 - val_loss: 1.0959 - val_accuracy: 0.7280 Epoch 18/30 63/63 [==============================] - 60s 948ms/step - loss: 0.0672 - accuracy: 0.9735 - val_loss: 1.2993 - val_accuracy: 0.7420 Epoch 19/30 63/63 [==============================] - 58s 919ms/step - loss: 0.0604 - accuracy: 0.9795 - val_loss: 1.3414 - val_accuracy: 0.7340 Epoch 20/30 63/63 [==============================] - 61s 963ms/step - loss: 0.0780 - accuracy: 0.9715 - val_loss: 1.5294 - val_accuracy: 0.7060 Epoch 21/30 63/63 [==============================] - 62s 980ms/step - loss: 0.0542 - accuracy: 0.9860 - val_loss: 1.4657 - val_accuracy: 0.7490 Epoch 22/30 63/63 [==============================] - 58s 926ms/step - loss: 0.0595 - accuracy: 0.9815 - val_loss: 1.6057 - val_accuracy: 0.7120 Epoch 23/30 63/63 [==============================] - 57s 901ms/step - loss: 0.0598 - accuracy: 0.9830 - val_loss: 1.7849 - val_accuracy: 0.7210 Epoch 24/30 63/63 [==============================] - 54s 855ms/step - loss: 0.0472 - accuracy: 0.9875 - val_loss: 1.9420 - val_accuracy: 0.7340 Epoch 25/30 63/63 [==============================] - 60s 945ms/step - loss: 0.0416 - accuracy: 0.9880 - val_loss: 2.0887 - val_accuracy: 0.7100 Epoch 26/30 63/63 [==============================] - 60s 949ms/step - loss: 0.0393 - accuracy: 0.9880 - val_loss: 1.8387 - val_accuracy: 0.7260 Epoch 27/30 63/63 [==============================] - 57s 908ms/step - loss: 0.0294 - accuracy: 0.9910 - val_loss: 2.0594 - val_accuracy: 0.7160 Epoch 28/30 63/63 [==============================] - 56s 891ms/step - loss: 0.0363 - accuracy: 0.9865 - val_loss: 2.0581 - val_accuracy: 0.7200 Epoch 29/30 63/63 [==============================] - 59s 932ms/step - loss: 0.0541 - accuracy: 0.9850 - val_loss: 2.0585 - val_accuracy: 0.7230 Epoch 30/30 63/63 [==============================] - 60s 943ms/step - loss: 0.0328 - accuracy: 0.9935 - val_loss: 2.1474 - val_accuracy: 0.7200
Displaying curves of loss and accuracy during training
accuracy = history.history["accuracy"]
val_accuracy = history.history["val_accuracy"]
loss = history.history["loss"]
val_loss = history.history["val_loss"]
epochs = range(1, len(accuracy) + 1)
plt.plot(epochs, accuracy, "bo", label="Training accuracy")
plt.plot(epochs, val_accuracy, "b", label="Validation accuracy")
plt.title("Training and validation accuracy")
plt.legend()
plt.figure()
plt.plot(epochs, loss, "bo", label="Training loss")
plt.plot(epochs, val_loss, "b", label="Validation loss")
plt.title("Training and validation loss")
plt.legend()
plt.show()
Overfitting is evident, around epoch 6.
Evaluating the model on the test set
test_model = keras.models.load_model("./models/convnet_from_scratch_1123.keras")
test_loss, test_acc = test_model.evaluate(test_dataset)
print(f"Test accuracy: {test_acc:.3f}")
2024-11-23 22:42:24.661355: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'Placeholder/_4' with dtype int32 and shape [2000]
[[{{node Placeholder/_4}}]]
2024-11-23 22:42:24.661925: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'Placeholder/_4' with dtype int32 and shape [2000]
[[{{node Placeholder/_4}}]]
63/63 [==============================] - 16s 241ms/step - loss: 0.5654 - accuracy: 0.7130 Test accuracy: 0.713
Model(convnet_from_scratch_1123): test accuracy is 0.713
Instantiating the VGG16 convolutional base
conv_base = keras.applications.vgg16.VGG16(
weights="imagenet",
include_top=False,
input_shape=(180, 180, 3))
conv_base.summary()
Model: "vgg16"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_3 (InputLayer) [(None, 180, 180, 3)] 0
block1_conv1 (Conv2D) (None, 180, 180, 64) 1792
block1_conv2 (Conv2D) (None, 180, 180, 64) 36928
block1_pool (MaxPooling2D) (None, 90, 90, 64) 0
block2_conv1 (Conv2D) (None, 90, 90, 128) 73856
block2_conv2 (Conv2D) (None, 90, 90, 128) 147584
block2_pool (MaxPooling2D) (None, 45, 45, 128) 0
block3_conv1 (Conv2D) (None, 45, 45, 256) 295168
block3_conv2 (Conv2D) (None, 45, 45, 256) 590080
block3_conv3 (Conv2D) (None, 45, 45, 256) 590080
block3_pool (MaxPooling2D) (None, 22, 22, 256) 0
block4_conv1 (Conv2D) (None, 22, 22, 512) 1180160
block4_conv2 (Conv2D) (None, 22, 22, 512) 2359808
block4_conv3 (Conv2D) (None, 22, 22, 512) 2359808
block4_pool (MaxPooling2D) (None, 11, 11, 512) 0
block5_conv1 (Conv2D) (None, 11, 11, 512) 2359808
block5_conv2 (Conv2D) (None, 11, 11, 512) 2359808
block5_conv3 (Conv2D) (None, 11, 11, 512) 2359808
block5_pool (MaxPooling2D) (None, 5, 5, 512) 0
=================================================================
Total params: 14,714,688
Trainable params: 14,714,688
Non-trainable params: 0
_________________________________________________________________
Extracting the VGG16 features and corresponding labels
import numpy as np
def get_features_and_labels(dataset):
all_features = []
all_labels = []
for images, labels in dataset:
preprocessed_images = keras.applications.vgg16.preprocess_input(images)
features = conv_base.predict(preprocessed_images)
all_features.append(features)
all_labels.append(labels)
return np.concatenate(all_features), np.concatenate(all_labels)
train_features, train_labels = get_features_and_labels(train_dataset)
val_features, val_labels = get_features_and_labels(validation_dataset)
test_features, test_labels = get_features_and_labels(test_dataset)
1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 4s 4s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 4s 4s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 4s 4s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 4s 4s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 4s 4s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 4s 4s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 4s 4s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 2s 2s/step
2024-11-26 13:01:54.832562: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'Placeholder/_4' with dtype int32 and shape [1000]
[[{{node Placeholder/_4}}]]
2024-11-26 13:01:54.833020: I tensorflow/core/common_runtime/executor.cc:1197] [/device:CPU:0] (DEBUG INFO) Executor start aborting (this does not indicate an error and you can ignore this message): INVALID_ARGUMENT: You must feed a value for placeholder tensor 'Placeholder/_4' with dtype int32 and shape [1000]
[[{{node Placeholder/_4}}]]
1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 5s 5s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 4s 4s/step 1/1 [==============================] - 4s 4s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 1s 808ms/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 4s 4s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 4s 4s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 20s 20s/step 1/1 [==============================] - 9s 9s/step 1/1 [==============================] - 5s 5s/step 1/1 [==============================] - 4s 4s/step 1/1 [==============================] - 4s 4s/step 1/1 [==============================] - 4s 4s/step 1/1 [==============================] - 4s 4s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 4s 4s/step 1/1 [==============================] - 4s 4s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 2s 2s/step
train_features.shape
(2000, 5, 5, 512)
Defining and training the densely connected classifier
Here we define a new dense neural networks, of only two (neuronal/trainable) layers.
inputs = keras.Input(shape=(5, 5, 512))
x = layers.Flatten()(inputs)
x = layers.Dense(256)(x)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(inputs, outputs)
model.summary()
Model: "model_2"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_4 (InputLayer) [(None, 5, 5, 512)] 0
flatten_3 (Flatten) (None, 12800) 0
dense_4 (Dense) (None, 256) 3277056
dropout_1 (Dropout) (None, 256) 0
dense_5 (Dense) (None, 1) 257
=================================================================
Total params: 3,277,313
Trainable params: 3,277,313
Non-trainable params: 0
_________________________________________________________________
model.compile(loss="binary_crossentropy",
optimizer="rmsprop",
metrics=["accuracy"])
callbacks = [
keras.callbacks.ModelCheckpoint(
filepath="./models/feature_extraction_1123.keras",
save_best_only=True,
monitor="val_loss")
]
history = model.fit(
train_features, train_labels,
epochs=20,
validation_data=(val_features, val_labels),
callbacks=callbacks)
Epoch 1/20 63/63 [==============================] - 3s 44ms/step - loss: 20.4105 - accuracy: 0.9185 - val_loss: 4.8456 - val_accuracy: 0.9670 Epoch 2/20 63/63 [==============================] - 3s 41ms/step - loss: 2.9027 - accuracy: 0.9780 - val_loss: 5.5260 - val_accuracy: 0.9630 Epoch 3/20 63/63 [==============================] - 2s 38ms/step - loss: 2.3831 - accuracy: 0.9810 - val_loss: 4.1589 - val_accuracy: 0.9700 Epoch 4/20 63/63 [==============================] - 2s 36ms/step - loss: 0.4333 - accuracy: 0.9950 - val_loss: 7.4632 - val_accuracy: 0.9680 Epoch 5/20 63/63 [==============================] - 2s 38ms/step - loss: 0.3776 - accuracy: 0.9965 - val_loss: 5.4601 - val_accuracy: 0.9760 Epoch 6/20 63/63 [==============================] - 2s 38ms/step - loss: 1.2773 - accuracy: 0.9925 - val_loss: 4.7615 - val_accuracy: 0.9760 Epoch 7/20 63/63 [==============================] - 2s 37ms/step - loss: 0.5056 - accuracy: 0.9935 - val_loss: 6.7920 - val_accuracy: 0.9690 Epoch 8/20 63/63 [==============================] - 2s 38ms/step - loss: 0.7286 - accuracy: 0.9940 - val_loss: 6.3005 - val_accuracy: 0.9720 Epoch 9/20 63/63 [==============================] - 2s 39ms/step - loss: 0.2629 - accuracy: 0.9970 - val_loss: 6.9170 - val_accuracy: 0.9720 Epoch 10/20 63/63 [==============================] - 3s 43ms/step - loss: 0.2905 - accuracy: 0.9975 - val_loss: 7.0202 - val_accuracy: 0.9680 Epoch 11/20 63/63 [==============================] - 3s 48ms/step - loss: 0.4237 - accuracy: 0.9955 - val_loss: 6.9332 - val_accuracy: 0.9750 Epoch 12/20 63/63 [==============================] - 2s 39ms/step - loss: 0.0419 - accuracy: 0.9985 - val_loss: 7.3697 - val_accuracy: 0.9710 Epoch 13/20 63/63 [==============================] - 4s 57ms/step - loss: 0.1354 - accuracy: 0.9985 - val_loss: 6.4407 - val_accuracy: 0.9740 Epoch 14/20 63/63 [==============================] - 3s 44ms/step - loss: 0.1871 - accuracy: 0.9975 - val_loss: 10.0386 - val_accuracy: 0.9660 Epoch 15/20 63/63 [==============================] - 3s 41ms/step - loss: 0.1973 - accuracy: 0.9975 - val_loss: 6.1241 - val_accuracy: 0.9780 Epoch 16/20 63/63 [==============================] - 3s 44ms/step - loss: 0.1893 - accuracy: 0.9975 - val_loss: 5.5385 - val_accuracy: 0.9800 Epoch 17/20 63/63 [==============================] - 3s 46ms/step - loss: 0.1283 - accuracy: 0.9990 - val_loss: 5.6345 - val_accuracy: 0.9800 Epoch 18/20 63/63 [==============================] - 2s 39ms/step - loss: 0.0632 - accuracy: 0.9990 - val_loss: 5.3476 - val_accuracy: 0.9810 Epoch 19/20 63/63 [==============================] - 3s 40ms/step - loss: 0.0790 - accuracy: 0.9990 - val_loss: 5.1008 - val_accuracy: 0.9810 Epoch 20/20 63/63 [==============================] - 2s 38ms/step - loss: 0.2394 - accuracy: 0.9990 - val_loss: 4.8205 - val_accuracy: 0.9820
Plotting the results
import matplotlib.pyplot as plt
acc = history.history["accuracy"]
val_acc = history.history["val_accuracy"]
loss = history.history["loss"]
val_loss = history.history["val_loss"]
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, "bo", label="Training accuracy")
plt.plot(epochs, val_acc, "b", label="Validation accuracy")
plt.title("Training and validation accuracy")
plt.legend()
plt.figure()
plt.plot(epochs, loss, "bo", label="Training loss")
plt.plot(epochs, val_loss, "b", label="Validation loss")
plt.title("Training and validation loss")
plt.legend()
plt.show()
evaluate in test dataset
test_model = keras.models.load_model(
"./models/feature_extraction_1123.keras")
test_loss, test_acc = test_model.evaluate(x=test_features, y=test_labels)
print(f"Test accuracy: {test_acc:.3f}")
63/63 [==============================] - 1s 6ms/step - loss: 4.9291 - accuracy: 0.9715 Test accuracy: 0.971
Instantiating and freezing the VGG16 convolutional base
conv_base = keras.applications.vgg16.VGG16(
weights="imagenet",
include_top=False)
conv_base.summary()
Model: "vgg16"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_2 (InputLayer) [(None, None, None, 3)] 0
block1_conv1 (Conv2D) (None, None, None, 64) 1792
block1_conv2 (Conv2D) (None, None, None, 64) 36928
block1_pool (MaxPooling2D) (None, None, None, 64) 0
block2_conv1 (Conv2D) (None, None, None, 128) 73856
block2_conv2 (Conv2D) (None, None, None, 128) 147584
block2_pool (MaxPooling2D) (None, None, None, 128) 0
block3_conv1 (Conv2D) (None, None, None, 256) 295168
block3_conv2 (Conv2D) (None, None, None, 256) 590080
block3_conv3 (Conv2D) (None, None, None, 256) 590080
block3_pool (MaxPooling2D) (None, None, None, 256) 0
block4_conv1 (Conv2D) (None, None, None, 512) 1180160
block4_conv2 (Conv2D) (None, None, None, 512) 2359808
block4_conv3 (Conv2D) (None, None, None, 512) 2359808
block4_pool (MaxPooling2D) (None, None, None, 512) 0
block5_conv1 (Conv2D) (None, None, None, 512) 2359808
block5_conv2 (Conv2D) (None, None, None, 512) 2359808
block5_conv3 (Conv2D) (None, None, None, 512) 2359808
block5_pool (MaxPooling2D) (None, None, None, 512) 0
=================================================================
Total params: 14,714,688
Trainable params: 14,714,688
Non-trainable params: 0
_________________________________________________________________
The number of trainable weights after freezing the conv base:
conv_base.trainable = False
conv_base.summary()
Model: "vgg16"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_2 (InputLayer) [(None, None, None, 3)] 0
block1_conv1 (Conv2D) (None, None, None, 64) 1792
block1_conv2 (Conv2D) (None, None, None, 64) 36928
block1_pool (MaxPooling2D) (None, None, None, 64) 0
block2_conv1 (Conv2D) (None, None, None, 128) 73856
block2_conv2 (Conv2D) (None, None, None, 128) 147584
block2_pool (MaxPooling2D) (None, None, None, 128) 0
block3_conv1 (Conv2D) (None, None, None, 256) 295168
block3_conv2 (Conv2D) (None, None, None, 256) 590080
block3_conv3 (Conv2D) (None, None, None, 256) 590080
block3_pool (MaxPooling2D) (None, None, None, 256) 0
block4_conv1 (Conv2D) (None, None, None, 512) 1180160
block4_conv2 (Conv2D) (None, None, None, 512) 2359808
block4_conv3 (Conv2D) (None, None, None, 512) 2359808
block4_pool (MaxPooling2D) (None, None, None, 512) 0
block5_conv1 (Conv2D) (None, None, None, 512) 2359808
block5_conv2 (Conv2D) (None, None, None, 512) 2359808
block5_conv3 (Conv2D) (None, None, None, 512) 2359808
block5_pool (MaxPooling2D) (None, None, None, 512) 0
=================================================================
Total params: 14,714,688
Trainable params: 0
Non-trainable params: 14,714,688
_________________________________________________________________
Adding a data augmentation stage and a classifier to the convolutional base
data_augmentation = keras.Sequential(
[
layers.RandomFlip("horizontal"),
layers.RandomRotation(0.1),
layers.RandomZoom(0.2),
]
)
inputs = keras.Input(shape=(180, 180, 3))
x = data_augmentation(inputs)
x = keras.applications.vgg16.preprocess_input(x)
x = conv_base(x)
x = layers.Flatten()(x)
x = layers.Dense(256)(x)
x = layers.Dropout(0.5)(x)
outputs = layers.Dense(1, activation="sigmoid")(x)
model = keras.Model(inputs, outputs)
model.summary()
Model: "model"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_3 (InputLayer) [(None, 180, 180, 3)] 0
sequential (Sequential) (None, 180, 180, 3) 0
tf.__operators__.getitem_1 (None, 180, 180, 3) 0
(SlicingOpLambda)
tf.nn.bias_add_1 (TFOpLambd (None, 180, 180, 3) 0
a)
vgg16 (Functional) (None, None, None, 512) 14714688
flatten (Flatten) (None, 12800) 0
dense (Dense) (None, 256) 3277056
dropout (Dropout) (None, 256) 0
dense_1 (Dense) (None, 1) 257
=================================================================
Total params: 17,992,001
Trainable params: 3,277,313
Non-trainable params: 14,714,688
_________________________________________________________________
model.compile(loss="binary_crossentropy",
optimizer="rmsprop",
metrics=["accuracy"])
callbacks = [
keras.callbacks.ModelCheckpoint(
filepath="./models/feature_extraction_with_data_augmentation_1123.keras",
save_best_only=True,
monitor="val_loss")
]
history = model.fit(
train_dataset,
epochs=50,
validation_data=validation_dataset,
callbacks=callbacks)
Epoch 1/50 63/63 [==============================] - 293s 5s/step - loss: 21.3421 - accuracy: 0.9020 - val_loss: 8.7216 - val_accuracy: 0.9450 Epoch 2/50 63/63 [==============================] - 291s 5s/step - loss: 6.5155 - accuracy: 0.9480 - val_loss: 14.8055 - val_accuracy: 0.9170 Epoch 3/50 63/63 [==============================] - 288s 5s/step - loss: 6.9461 - accuracy: 0.9500 - val_loss: 7.3748 - val_accuracy: 0.9650 Epoch 4/50 63/63 [==============================] - 282s 5s/step - loss: 5.2459 - accuracy: 0.9610 - val_loss: 3.4148 - val_accuracy: 0.9740 Epoch 5/50 63/63 [==============================] - 295s 5s/step - loss: 4.5876 - accuracy: 0.9610 - val_loss: 8.6996 - val_accuracy: 0.9580 Epoch 6/50 63/63 [==============================] - 284s 5s/step - loss: 4.0565 - accuracy: 0.9670 - val_loss: 4.0778 - val_accuracy: 0.9730 Epoch 7/50 63/63 [==============================] - 284s 5s/step - loss: 3.7700 - accuracy: 0.9655 - val_loss: 3.6704 - val_accuracy: 0.9740 Epoch 8/50 63/63 [==============================] - 300s 5s/step - loss: 3.3243 - accuracy: 0.9690 - val_loss: 4.0507 - val_accuracy: 0.9780 Epoch 9/50 63/63 [==============================] - 381s 6s/step - loss: 2.8704 - accuracy: 0.9730 - val_loss: 2.7639 - val_accuracy: 0.9820 Epoch 10/50 63/63 [==============================] - 301s 5s/step - loss: 2.6955 - accuracy: 0.9755 - val_loss: 4.1958 - val_accuracy: 0.9810 Epoch 11/50 63/63 [==============================] - 363s 6s/step - loss: 1.8371 - accuracy: 0.9760 - val_loss: 3.2066 - val_accuracy: 0.9790 Epoch 12/50 63/63 [==============================] - 280s 4s/step - loss: 2.8242 - accuracy: 0.9730 - val_loss: 5.9767 - val_accuracy: 0.9660 Epoch 13/50 63/63 [==============================] - 270s 4s/step - loss: 1.6352 - accuracy: 0.9820 - val_loss: 2.1206 - val_accuracy: 0.9820 Epoch 14/50 63/63 [==============================] - 270s 4s/step - loss: 1.8645 - accuracy: 0.9795 - val_loss: 3.5659 - val_accuracy: 0.9780 Epoch 15/50 63/63 [==============================] - 272s 4s/step - loss: 2.3091 - accuracy: 0.9775 - val_loss: 8.7989 - val_accuracy: 0.9550 Epoch 16/50 63/63 [==============================] - 268s 4s/step - loss: 1.6426 - accuracy: 0.9825 - val_loss: 4.3752 - val_accuracy: 0.9690 Epoch 17/50 63/63 [==============================] - 260s 4s/step - loss: 1.4498 - accuracy: 0.9790 - val_loss: 2.4409 - val_accuracy: 0.9800 Epoch 18/50 63/63 [==============================] - 264s 4s/step - loss: 0.9696 - accuracy: 0.9855 - val_loss: 3.8909 - val_accuracy: 0.9750 Epoch 19/50 63/63 [==============================] - 264s 4s/step - loss: 1.5907 - accuracy: 0.9830 - val_loss: 3.4011 - val_accuracy: 0.9800 Epoch 20/50 63/63 [==============================] - 261s 4s/step - loss: 1.1950 - accuracy: 0.9870 - val_loss: 3.4109 - val_accuracy: 0.9800 Epoch 21/50 63/63 [==============================] - 285s 5s/step - loss: 0.9708 - accuracy: 0.9920 - val_loss: 2.7640 - val_accuracy: 0.9760 Epoch 22/50 63/63 [==============================] - 3372s 46s/step - loss: 1.1015 - accuracy: 0.9850 - val_loss: 2.6145 - val_accuracy: 0.9810 Epoch 23/50 63/63 [==============================] - 288s 5s/step - loss: 0.9966 - accuracy: 0.9855 - val_loss: 2.6974 - val_accuracy: 0.9790 Epoch 24/50 63/63 [==============================] - 287s 5s/step - loss: 1.1550 - accuracy: 0.9855 - val_loss: 4.0739 - val_accuracy: 0.9750 Epoch 25/50 63/63 [==============================] - 290s 5s/step - loss: 0.6695 - accuracy: 0.9900 - val_loss: 2.2523 - val_accuracy: 0.9810 Epoch 26/50 63/63 [==============================] - 296s 5s/step - loss: 0.8749 - accuracy: 0.9880 - val_loss: 3.6911 - val_accuracy: 0.9750 Epoch 27/50 63/63 [==============================] - 294s 5s/step - loss: 0.8263 - accuracy: 0.9870 - val_loss: 2.5182 - val_accuracy: 0.9770 Epoch 28/50 63/63 [==============================] - 387s 6s/step - loss: 0.9241 - accuracy: 0.9850 - val_loss: 2.9380 - val_accuracy: 0.9800 Epoch 29/50 63/63 [==============================] - 260s 4s/step - loss: 0.9744 - accuracy: 0.9870 - val_loss: 3.4770 - val_accuracy: 0.9720 Epoch 30/50 63/63 [==============================] - 287s 5s/step - loss: 0.6125 - accuracy: 0.9895 - val_loss: 2.7450 - val_accuracy: 0.9740 Epoch 31/50 63/63 [==============================] - 299s 5s/step - loss: 0.8625 - accuracy: 0.9860 - val_loss: 3.1449 - val_accuracy: 0.9780 Epoch 32/50 63/63 [==============================] - 296s 5s/step - loss: 0.6221 - accuracy: 0.9880 - val_loss: 4.3230 - val_accuracy: 0.9690 Epoch 33/50 63/63 [==============================] - 359s 6s/step - loss: 0.9577 - accuracy: 0.9835 - val_loss: 3.0792 - val_accuracy: 0.9780 Epoch 34/50 63/63 [==============================] - 487s 8s/step - loss: 1.0503 - accuracy: 0.9815 - val_loss: 3.7072 - val_accuracy: 0.9740 Epoch 35/50 63/63 [==============================] - 282s 4s/step - loss: 0.6589 - accuracy: 0.9885 - val_loss: 2.7751 - val_accuracy: 0.9780 Epoch 36/50 63/63 [==============================] - 275s 4s/step - loss: 0.6084 - accuracy: 0.9860 - val_loss: 2.9246 - val_accuracy: 0.9720 Epoch 37/50 63/63 [==============================] - 276s 4s/step - loss: 0.7831 - accuracy: 0.9865 - val_loss: 3.6564 - val_accuracy: 0.9720 Epoch 38/50 63/63 [==============================] - 278s 4s/step - loss: 0.7711 - accuracy: 0.9860 - val_loss: 2.6351 - val_accuracy: 0.9800 Epoch 39/50 63/63 [==============================] - 274s 4s/step - loss: 0.7031 - accuracy: 0.9845 - val_loss: 2.3187 - val_accuracy: 0.9790 Epoch 40/50 63/63 [==============================] - 281s 4s/step - loss: 0.7030 - accuracy: 0.9885 - val_loss: 2.1637 - val_accuracy: 0.9810 Epoch 41/50 63/63 [==============================] - 286s 5s/step - loss: 0.7605 - accuracy: 0.9870 - val_loss: 2.5422 - val_accuracy: 0.9790 Epoch 42/50 63/63 [==============================] - 275s 4s/step - loss: 0.6412 - accuracy: 0.9870 - val_loss: 3.5225 - val_accuracy: 0.9730 Epoch 43/50 63/63 [==============================] - 283s 5s/step - loss: 0.6751 - accuracy: 0.9895 - val_loss: 2.4380 - val_accuracy: 0.9780 Epoch 44/50 63/63 [==============================] - 271s 4s/step - loss: 0.6162 - accuracy: 0.9875 - val_loss: 2.2258 - val_accuracy: 0.9800 Epoch 45/50 63/63 [==============================] - 268s 4s/step - loss: 0.5465 - accuracy: 0.9890 - val_loss: 2.6444 - val_accuracy: 0.9780 Epoch 46/50 63/63 [==============================] - 317s 5s/step - loss: 0.4733 - accuracy: 0.9920 - val_loss: 2.6297 - val_accuracy: 0.9780 Epoch 47/50 63/63 [==============================] - 267s 4s/step - loss: 0.6684 - accuracy: 0.9860 - val_loss: 2.9017 - val_accuracy: 0.9730 Epoch 48/50 63/63 [==============================] - 264s 4s/step - loss: 0.6128 - accuracy: 0.9895 - val_loss: 4.5101 - val_accuracy: 0.9700 Epoch 49/50 63/63 [==============================] - 259s 4s/step - loss: 0.8340 - accuracy: 0.9850 - val_loss: 2.2681 - val_accuracy: 0.9780 Epoch 50/50 63/63 [==============================] - 268s 4s/step - loss: 0.5233 - accuracy: 0.9910 - val_loss: 2.1408 - val_accuracy: 0.9740
acc = history.history["accuracy"]
val_acc = history.history["val_accuracy"]
loss = history.history["loss"]
val_loss = history.history["val_loss"]
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, "bo", label="Training accuracy")
plt.plot(epochs, val_acc, "b", label="Validation accuracy")
plt.title("Training and validation accuracy")
plt.legend()
plt.figure()
plt.plot(epochs, loss, "bo", label="Training loss")
plt.plot(epochs, val_loss, "b", label="Validation loss")
plt.title("Training and validation loss")
plt.legend()
plt.show()
Evaluating the model on the test set
test_model = keras.models.load_model(
"./models/feature_extraction_with_data_augmentation_1123.keras")
test_loss, test_acc = test_model.evaluate(test_dataset)
print(f"Test accuracy: {test_acc:.3f}")
63/63 [==============================] - 186s 3s/step - loss: 3.2970 - accuracy: 0.9780 Test accuracy: 0.978
Freezing all layers until the fourth from the last
conv_base.trainable = True
for layer in conv_base.layers[:-4]:
layer.trainable = False
model.summary()
Model: "model_3"
_________________________________________________________________
Layer (type) Output Shape Param #
=================================================================
input_5 (InputLayer) [(None, 180, 180, 3)] 0
sequential_2 (Sequential) (None, 180, 180, 3) 0
tf.__operators__.getitem (S (None, 180, 180, 3) 0
licingOpLambda)
tf.nn.bias_add (TFOpLambda) (None, 180, 180, 3) 0
vgg16 (Functional) (None, 5, 5, 512) 14714688
flatten_4 (Flatten) (None, 12800) 0
dense_6 (Dense) (None, 256) 3277056
dropout_2 (Dropout) (None, 256) 0
dense_7 (Dense) (None, 1) 257
=================================================================
Total params: 17,992,001
Trainable params: 10,356,737
Non-trainable params: 7,635,264
_________________________________________________________________
Fine-tuning the model
model.compile(loss="binary_crossentropy",
optimizer=keras.optimizers.RMSprop(learning_rate=1e-5),
metrics=["accuracy"])
callbacks = [
keras.callbacks.ModelCheckpoint(
filepath="./models/fine_tuning_1123.keras",
save_best_only=True,
monitor="val_loss")
]
history = model.fit(
train_dataset,
epochs=30,
validation_data=validation_dataset,
callbacks=callbacks)
Epoch 1/30 63/63 [==============================] - 313s 5s/step - loss: 0.2381 - accuracy: 0.9905 - val_loss: 1.3471 - val_accuracy: 0.9850 Epoch 2/30 63/63 [==============================] - 303s 5s/step - loss: 0.2744 - accuracy: 0.9940 - val_loss: 1.4860 - val_accuracy: 0.9830 Epoch 3/30 63/63 [==============================] - 309s 5s/step - loss: 0.4824 - accuracy: 0.9915 - val_loss: 1.4627 - val_accuracy: 0.9830 Epoch 4/30 63/63 [==============================] - 299s 5s/step - loss: 0.4817 - accuracy: 0.9895 - val_loss: 1.5790 - val_accuracy: 0.9810 Epoch 5/30 63/63 [==============================] - 298s 5s/step - loss: 0.2347 - accuracy: 0.9945 - val_loss: 1.7145 - val_accuracy: 0.9810 Epoch 6/30 63/63 [==============================] - 289s 5s/step - loss: 0.1841 - accuracy: 0.9945 - val_loss: 1.7989 - val_accuracy: 0.9800 Epoch 7/30 63/63 [==============================] - 292s 5s/step - loss: 0.4181 - accuracy: 0.9885 - val_loss: 1.4607 - val_accuracy: 0.9800 Epoch 8/30 63/63 [==============================] - 301s 5s/step - loss: 0.2297 - accuracy: 0.9925 - val_loss: 1.6569 - val_accuracy: 0.9760 Epoch 9/30 63/63 [==============================] - 308s 5s/step - loss: 0.3230 - accuracy: 0.9925 - val_loss: 1.6711 - val_accuracy: 0.9820 Epoch 10/30 63/63 [==============================] - 309s 5s/step - loss: 0.1040 - accuracy: 0.9935 - val_loss: 1.4894 - val_accuracy: 0.9830 Epoch 11/30 63/63 [==============================] - 298s 5s/step - loss: 0.1207 - accuracy: 0.9950 - val_loss: 1.5354 - val_accuracy: 0.9800 Epoch 12/30 63/63 [==============================] - 306s 5s/step - loss: 0.1992 - accuracy: 0.9940 - val_loss: 1.2880 - val_accuracy: 0.9820 Epoch 13/30 63/63 [==============================] - 306s 5s/step - loss: 0.1631 - accuracy: 0.9930 - val_loss: 1.9994 - val_accuracy: 0.9800 Epoch 14/30 63/63 [==============================] - 297s 5s/step - loss: 0.2170 - accuracy: 0.9940 - val_loss: 1.3193 - val_accuracy: 0.9830 Epoch 15/30 63/63 [==============================] - 289s 5s/step - loss: 0.1370 - accuracy: 0.9960 - val_loss: 2.0114 - val_accuracy: 0.9760 Epoch 16/30 63/63 [==============================] - 287s 5s/step - loss: 0.3034 - accuracy: 0.9920 - val_loss: 1.2102 - val_accuracy: 0.9830 Epoch 17/30 63/63 [==============================] - 289s 5s/step - loss: 0.2454 - accuracy: 0.9920 - val_loss: 3.1999 - val_accuracy: 0.9690 Epoch 18/30 63/63 [==============================] - 288s 5s/step - loss: 0.1546 - accuracy: 0.9940 - val_loss: 1.5309 - val_accuracy: 0.9830 Epoch 19/30 63/63 [==============================] - 287s 5s/step - loss: 0.1475 - accuracy: 0.9960 - val_loss: 1.3936 - val_accuracy: 0.9830 Epoch 20/30 63/63 [==============================] - 288s 5s/step - loss: 0.3364 - accuracy: 0.9930 - val_loss: 1.4716 - val_accuracy: 0.9820 Epoch 21/30 63/63 [==============================] - 289s 5s/step - loss: 0.1291 - accuracy: 0.9960 - val_loss: 1.4022 - val_accuracy: 0.9810 Epoch 22/30 63/63 [==============================] - 288s 5s/step - loss: 0.0954 - accuracy: 0.9950 - val_loss: 1.7019 - val_accuracy: 0.9790 Epoch 23/30 63/63 [==============================] - 289s 5s/step - loss: 0.1584 - accuracy: 0.9950 - val_loss: 1.0620 - val_accuracy: 0.9870 Epoch 24/30 63/63 [==============================] - 291s 5s/step - loss: 0.1124 - accuracy: 0.9975 - val_loss: 1.1888 - val_accuracy: 0.9840 Epoch 25/30 63/63 [==============================] - 288s 5s/step - loss: 0.0632 - accuracy: 0.9965 - val_loss: 1.4640 - val_accuracy: 0.9820 Epoch 26/30 63/63 [==============================] - 288s 5s/step - loss: 0.1423 - accuracy: 0.9960 - val_loss: 1.2402 - val_accuracy: 0.9860 Epoch 27/30 63/63 [==============================] - 289s 5s/step - loss: 0.0658 - accuracy: 0.9965 - val_loss: 1.2593 - val_accuracy: 0.9850 Epoch 28/30 63/63 [==============================] - 310s 5s/step - loss: 0.0611 - accuracy: 0.9975 - val_loss: 1.3234 - val_accuracy: 0.9830 Epoch 29/30 63/63 [==============================] - 313s 5s/step - loss: 0.2037 - accuracy: 0.9925 - val_loss: 1.1816 - val_accuracy: 0.9820 Epoch 30/30 63/63 [==============================] - 298s 5s/step - loss: 0.0813 - accuracy: 0.9965 - val_loss: 1.2378 - val_accuracy: 0.9840
acc = history.history["accuracy"]
val_acc = history.history["val_accuracy"]
loss = history.history["loss"]
val_loss = history.history["val_loss"]
epochs = range(1, len(acc) + 1)
plt.plot(epochs, acc, "bo", label="Training accuracy")
plt.plot(epochs, val_acc, "b", label="Validation accuracy")
plt.title("Training and validation accuracy")
plt.legend()
plt.figure()
plt.plot(epochs, loss, "bo", label="Training loss")
plt.plot(epochs, val_loss, "b", label="Validation loss")
plt.title("Training and validation loss")
plt.legend()
plt.show()
model = keras.models.load_model("./models/fine_tuning_1123.keras")
test_loss, test_acc = model.evaluate(test_dataset)
print(f"Test accuracy: {test_acc:.3f}")
63/63 [==============================] - 194s 3s/step - loss: 1.6192 - accuracy: 0.9770 Test accuracy: 0.977
I fine-tuned a pre-trained model by freezing all layers except for the top four. However, the test accuracy remains unchanged compared to freezing all layers. As a next step, I will try freezing all layers except for the top six and re-evaluate the test accuracy. See below:
conv_base.trainable = True
for layer in conv_base.layers[:-6]:
layer.trainable = False
model.compile(loss="binary_crossentropy",
optimizer=keras.optimizers.RMSprop(learning_rate=1e-5),
metrics=["accuracy"])
callbacks = [
keras.callbacks.ModelCheckpoint(
filepath="./models/fine_tuning_1123_2.keras",
save_best_only=True,
monitor="val_loss")
]
history = model.fit(
train_dataset,
epochs=30,
validation_data=validation_dataset,
callbacks=callbacks)
Epoch 1/30 63/63 [==============================] - 354s 6s/step - loss: 0.1536 - accuracy: 0.9955 - val_loss: 1.3658 - val_accuracy: 0.9830 Epoch 2/30 63/63 [==============================] - 300s 5s/step - loss: 0.1678 - accuracy: 0.9970 - val_loss: 1.4949 - val_accuracy: 0.9870 Epoch 3/30 63/63 [==============================] - 296s 5s/step - loss: 0.0741 - accuracy: 0.9980 - val_loss: 1.2615 - val_accuracy: 0.9870 Epoch 4/30 63/63 [==============================] - 290s 5s/step - loss: 0.0888 - accuracy: 0.9965 - val_loss: 1.2293 - val_accuracy: 0.9880 Epoch 5/30 63/63 [==============================] - 287s 5s/step - loss: 0.0492 - accuracy: 0.9945 - val_loss: 1.2901 - val_accuracy: 0.9860 Epoch 6/30 63/63 [==============================] - 287s 5s/step - loss: 0.1234 - accuracy: 0.9960 - val_loss: 1.4976 - val_accuracy: 0.9830 Epoch 7/30 63/63 [==============================] - 288s 5s/step - loss: 0.0607 - accuracy: 0.9980 - val_loss: 1.3523 - val_accuracy: 0.9850 Epoch 8/30 63/63 [==============================] - 288s 5s/step - loss: 0.1662 - accuracy: 0.9965 - val_loss: 1.2091 - val_accuracy: 0.9860 Epoch 9/30 63/63 [==============================] - 287s 5s/step - loss: 0.2118 - accuracy: 0.9955 - val_loss: 1.7535 - val_accuracy: 0.9790 Epoch 10/30 63/63 [==============================] - 289s 5s/step - loss: 0.0406 - accuracy: 0.9985 - val_loss: 1.4633 - val_accuracy: 0.9830 Epoch 11/30 63/63 [==============================] - 296s 5s/step - loss: 0.0223 - accuracy: 0.9975 - val_loss: 1.6206 - val_accuracy: 0.9800 Epoch 12/30 63/63 [==============================] - 310s 5s/step - loss: 0.0775 - accuracy: 0.9975 - val_loss: 1.4389 - val_accuracy: 0.9800 Epoch 13/30 63/63 [==============================] - 311s 5s/step - loss: 0.0044 - accuracy: 0.9990 - val_loss: 1.4765 - val_accuracy: 0.9810 Epoch 14/30 63/63 [==============================] - 310s 5s/step - loss: 0.0957 - accuracy: 0.9975 - val_loss: 2.1506 - val_accuracy: 0.9750 Epoch 15/30 63/63 [==============================] - 309s 5s/step - loss: 0.0241 - accuracy: 0.9985 - val_loss: 1.6471 - val_accuracy: 0.9800 Epoch 16/30 63/63 [==============================] - 332s 5s/step - loss: 0.0161 - accuracy: 0.9995 - val_loss: 1.1977 - val_accuracy: 0.9850 Epoch 17/30 63/63 [==============================] - 317s 5s/step - loss: 0.0803 - accuracy: 0.9950 - val_loss: 1.3282 - val_accuracy: 0.9860 Epoch 18/30 63/63 [==============================] - 322s 5s/step - loss: 0.1062 - accuracy: 0.9965 - val_loss: 1.5808 - val_accuracy: 0.9810 Epoch 19/30 63/63 [==============================] - 351s 6s/step - loss: 0.0288 - accuracy: 0.9985 - val_loss: 1.3823 - val_accuracy: 0.9850 Epoch 20/30 63/63 [==============================] - 314s 5s/step - loss: 0.0577 - accuracy: 0.9975 - val_loss: 1.5336 - val_accuracy: 0.9850 Epoch 21/30 63/63 [==============================] - 309s 5s/step - loss: 0.0122 - accuracy: 0.9985 - val_loss: 1.4148 - val_accuracy: 0.9840 Epoch 22/30 63/63 [==============================] - 313s 5s/step - loss: 0.0833 - accuracy: 0.9970 - val_loss: 1.3208 - val_accuracy: 0.9830 Epoch 23/30 63/63 [==============================] - 312s 5s/step - loss: 0.0673 - accuracy: 0.9980 - val_loss: 1.3729 - val_accuracy: 0.9860 Epoch 24/30 63/63 [==============================] - 312s 5s/step - loss: 0.0455 - accuracy: 0.9980 - val_loss: 1.3756 - val_accuracy: 0.9860 Epoch 25/30 63/63 [==============================] - 311s 5s/step - loss: 0.0126 - accuracy: 0.9990 - val_loss: 1.3199 - val_accuracy: 0.9860 Epoch 26/30 63/63 [==============================] - 289s 5s/step - loss: 0.1710 - accuracy: 0.9960 - val_loss: 1.3259 - val_accuracy: 0.9830 Epoch 27/30 63/63 [==============================] - 287s 5s/step - loss: 0.0391 - accuracy: 0.9975 - val_loss: 1.8287 - val_accuracy: 0.9800 Epoch 28/30 63/63 [==============================] - 287s 5s/step - loss: 0.0669 - accuracy: 0.9970 - val_loss: 1.5032 - val_accuracy: 0.9830 Epoch 29/30 63/63 [==============================] - 289s 5s/step - loss: 0.0417 - accuracy: 0.9990 - val_loss: 1.2371 - val_accuracy: 0.9840 Epoch 30/30 63/63 [==============================] - 288s 5s/step - loss: 0.0406 - accuracy: 0.9990 - val_loss: 1.4071 - val_accuracy: 0.9820
model = keras.models.load_model("./models/fine_tuning_1123_2.keras")
test_loss, test_acc = model.evaluate(test_dataset)
print(f"Test accuracy: {test_acc:.3f}")
63/63 [==============================] - 190s 3s/step - loss: 1.5891 - accuracy: 0.9815 Test accuracy: 0.982
Test accuracy is 0.982, which is an increase compared to fine-tuning the top 4 layers(0.977).
import tensorflow as tf
import matplotlib.pyplot as plt
# Dictionary of models and their evaluation method
models = {
'ConvNet From Scratch': {
'path': './models/convnet_from_scratch_1123.keras',
'use_features': False
},
'Feature Extraction': {
'path': './models/feature_extraction_1123.keras',
'use_features': True
},
'Feature Extraction with Augmentation': {
'path': './models/feature_extraction_with_data_augmentation_1123.keras',
'use_features': False
},
'Fine Tuning': {
'path': './models/fine_tuning_1123.keras',
'use_features': False
},
'Fine Tuning 2': {
'path': './models/fine_tuning_1123_2.keras',
'use_features': False
}
}
# Evaluate models and store accuracies
accuracies = {}
for name, info in models.items():
model = keras.models.load_model(info['path'])
if info['use_features']:
test_loss, test_acc = model.evaluate(x=test_features, y=test_labels)
else:
test_loss, test_acc = model.evaluate(test_dataset)
accuracies[name] = test_acc
print(f"{name} - Test accuracy: {test_acc:.3f}")
# Create bar plot
plt.figure(figsize=(12, 6))
names = list(accuracies.keys())
acc_values = list(accuracies.values())
bars = plt.bar(range(len(names)), acc_values)
plt.title('Model Accuracy Comparison')
plt.ylabel('Accuracy')
plt.xlabel('Models')
plt.ylim(0, 1)
# Rotate x-axis labels for better readability
plt.xticks(range(len(names)), names, rotation=45, ha='right')
# Add value labels on top of bars
for i, bar in enumerate(bars):
plt.text(bar.get_x() + bar.get_width()/2,
bar.get_height() + 0.01,
f'{acc_values[i]:.3f}',
ha='center')
plt.tight_layout()
plt.show()
63/63 [==============================] - 16s 246ms/step - loss: 0.5654 - accuracy: 0.7130 ConvNet From Scratch - Test accuracy: 0.713 63/63 [==============================] - 0s 5ms/step - loss: 4.9291 - accuracy: 0.9715 Feature Extraction - Test accuracy: 0.971 63/63 [==============================] - 199s 3s/step - loss: 2.0451 - accuracy: 0.9770 Feature Extraction with Augmentation - Test accuracy: 0.977 63/63 [==============================] - 200s 3s/step - loss: 1.6192 - accuracy: 0.9770 Fine Tuning - Test accuracy: 0.977 63/63 [==============================] - 246s 4s/step - loss: 1.5891 - accuracy: 0.9815 Fine Tuning 2 - Test accuracy: 0.982
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.metrics import confusion_matrix
def plot_confusion_matrix(model, name, use_features):
# Get predictions using evaluate
if use_features:
predictions = model.predict(test_features)
loss, accuracy = model.evaluate(x=test_features, y=test_labels)
true_labels = test_labels
else:
predictions = model.predict(test_dataset)
loss, accuracy = model.evaluate(test_dataset)
true_labels = np.concatenate([y for x, y in test_dataset])
# Convert predictions to binary
pred_labels = (predictions > 0.5).astype(int)
# Calculate confusion matrix
cm = confusion_matrix(true_labels, pred_labels)
# Plot confusion matrix
plt.figure(figsize=(8, 6))
sns.heatmap(cm, annot=True, fmt='d', cmap='Blues')
plt.title(f'Confusion Matrix - {name}\nAccuracy: {accuracy:.3f}')
plt.ylabel('True Label')
plt.xlabel('Predicted Label')
plt.show()
# Dictionary of models
models = {
"ConvNet From Scratch": {
"path": "./models/convnet_from_scratch_1123.keras",
"use_features": False
},
"Feature Extraction": {
"path": "./models/feature_extraction_1123.keras",
"use_features": True
},
"Feature Extraction with Augmentation": {
"path": "./models/feature_extraction_with_data_augmentation_1123.keras",
"use_features": False
},
"Fine Tuning": {
"path": "./models/fine_tuning_1123.keras",
"use_features": False
},
"Fine Tuning 2": {
"path": "./models/fine_tuning_1123_2.keras",
"use_features": False
}
}
# Plot confusion matrix for each model
for name, info in models.items():
print(f"\nProcessing {name}...")
model = keras.models.load_model(info["path"])
plot_confusion_matrix(model, name, info["use_features"])
Processing ConvNet From Scratch... 63/63 [==============================] - 16s 238ms/step 63/63 [==============================] - 15s 236ms/step - loss: 0.5654 - accuracy: 0.7130
Processing Feature Extraction... 63/63 [==============================] - 0s 5ms/step 63/63 [==============================] - 0s 4ms/step - loss: 4.9291 - accuracy: 0.9715
Processing Feature Extraction with Augmentation... 63/63 [==============================] - 190s 3s/step 63/63 [==============================] - 189s 3s/step - loss: 3.2970 - accuracy: 0.9780
Processing Fine Tuning... 63/63 [==============================] - 190s 3s/step 63/63 [==============================] - 193s 3s/step - loss: 1.6192 - accuracy: 0.9770
Processing Fine Tuning 2... 63/63 [==============================] - 194s 3s/step 63/63 [==============================] - 201s 3s/step - loss: 1.5891 - accuracy: 0.9815
from sklearn.metrics import classification_report, precision_recall_curve
def calculate_metrics(model, name, use_features):
# Get predictions
if use_features:
predictions = model.predict(test_features)
loss, accuracy = model.evaluate(x=test_features, y=test_labels)
true_labels = test_labels
else:
predictions = model.predict(test_dataset)
loss, accuracy = model.evaluate(test_dataset)
true_labels = np.concatenate([y for x, y in test_dataset])
# Convert predictions to binary
pred_labels = (predictions > 0.5).astype(int)
# Print classification report
print(f"\nClassification Report for {name}:")
print(f"Accuracy: {accuracy:.3f}")
print("-" * 60)
print(classification_report(true_labels, pred_labels,
target_names=['Cats', 'Dogs']))
# Use the same models dictionary from your code
for name, info in models.items():
print(f"\nProcessing {name}...")
model = keras.models.load_model(info["path"])
calculate_metrics(model, name, info["use_features"])
Processing ConvNet From Scratch...
63/63 [==============================] - 18s 264ms/step
63/63 [==============================] - 17s 268ms/step - loss: 0.5654 - accuracy: 0.7130
Classification Report for ConvNet From Scratch:
Accuracy: 0.713
------------------------------------------------------------
precision recall f1-score support
Cats 0.51 0.50 0.51 1000
Dogs 0.51 0.51 0.51 1000
accuracy 0.51 2000
macro avg 0.51 0.51 0.51 2000
weighted avg 0.51 0.51 0.51 2000
Processing Feature Extraction...
63/63 [==============================] - 0s 5ms/step
63/63 [==============================] - 0s 5ms/step - loss: 4.9291 - accuracy: 0.9715
Classification Report for Feature Extraction:
Accuracy: 0.971
------------------------------------------------------------
precision recall f1-score support
Cats 0.96 0.98 0.97 1000
Dogs 0.98 0.96 0.97 1000
accuracy 0.97 2000
macro avg 0.97 0.97 0.97 2000
weighted avg 0.97 0.97 0.97 2000
Processing Feature Extraction with Augmentation...
63/63 [==============================] - 200s 3s/step
63/63 [==============================] - 183s 3s/step - loss: 3.2970 - accuracy: 0.9780
Classification Report for Feature Extraction with Augmentation:
Accuracy: 0.978
------------------------------------------------------------
precision recall f1-score support
Cats 0.50 0.52 0.51 1000
Dogs 0.51 0.49 0.50 1000
accuracy 0.51 2000
macro avg 0.51 0.51 0.50 2000
weighted avg 0.51 0.51 0.50 2000
Processing Fine Tuning...
63/63 [==============================] - 189s 3s/step
63/63 [==============================] - 199s 3s/step - loss: 1.6192 - accuracy: 0.9770
Classification Report for Fine Tuning:
Accuracy: 0.977
------------------------------------------------------------
precision recall f1-score support
Cats 0.53 0.54 0.53 1000
Dogs 0.53 0.53 0.53 1000
accuracy 0.53 2000
macro avg 0.53 0.53 0.53 2000
weighted avg 0.53 0.53 0.53 2000
Processing Fine Tuning 2...
63/63 [==============================] - 198s 3s/step
63/63 [==============================] - 194s 3s/step - loss: 1.5891 - accuracy: 0.9815
Classification Report for Fine Tuning 2:
Accuracy: 0.982
------------------------------------------------------------
precision recall f1-score support
Cats 0.48 0.48 0.48 1000
Dogs 0.48 0.48 0.48 1000
accuracy 0.48 2000
macro avg 0.48 0.48 0.48 2000
weighted avg 0.48 0.48 0.48 2000
import matplotlib.pyplot as plt
from sklearn.metrics import precision_recall_curve
def plot_precision_recall_curves(models):
plt.figure(figsize=(10, 8))
for name, info in models.items():
# Load model
model = keras.models.load_model(info["path"])
# Get predictions
if info["use_features"]:
predictions = model.predict(test_features)
true_labels = test_labels
else:
predictions = model.predict(test_dataset)
true_labels = np.concatenate([y for x, y in test_dataset])
# Calculate precision-recall curve
precision, recall, _ = precision_recall_curve(true_labels, predictions)
# Plot curve for this model
plt.plot(recall, precision, label=name)
# Customize plot
plt.xlabel('Recall')
plt.ylabel('Precision')
plt.title('Precision-Recall Curves')
plt.legend(bbox_to_anchor=(1.05, 1), loc='upper left')
plt.grid(True)
plt.tight_layout()
plt.show()
# Use the same models dictionary and call the function
plot_precision_recall_curves(models)
63/63 [==============================] - 18s 274ms/step 63/63 [==============================] - 1s 7ms/step 63/63 [==============================] - 197s 3s/step 63/63 [==============================] - 202s 3s/step 63/63 [==============================] - 193s 3s/step
import matplotlib.pyplot as plt
def show_misclassified_examples(model, dataset, use_features=False, num_examples=5):
"""
Show examples where the model made incorrect predictions
Args:
model: The trained model
dataset: Test dataset
use_features: Whether the model uses features or raw images
num_examples: Number of misclassified examples to show
"""
if use_features:
predictions = model.predict(test_features)
true_labels = test_labels
print("Cannot show images for feature extraction model")
return
# Get predictions
all_images = []
all_labels = []
predictions = []
# Collect images and predictions
for images, labels in dataset:
preds = model.predict(images)
all_images.extend(images.numpy())
all_labels.extend(labels.numpy())
predictions.extend(preds)
all_images = np.array(all_images)
all_labels = np.array(all_labels)
predictions = np.array(predictions)
pred_labels = (predictions > 0.5).astype(int)
# Find misclassified examples
misclassified_idx = np.where(pred_labels.flatten() != all_labels)[0]
if len(misclassified_idx) == 0:
print("No misclassified examples found!")
return
# Show the misclassified examples
num_examples = min(num_examples, len(misclassified_idx))
plt.figure(figsize=(15, 3))
for i in range(num_examples):
idx = misclassified_idx[i]
plt.subplot(1, num_examples, i + 1)
plt.imshow(all_images[idx].astype("uint8"))
plt.axis('off')
true_label = "Cat" if all_labels[idx] == 0 else "Dog"
pred_label = "Cat" if pred_labels[idx] == 0 else "Dog"
confidence = abs(predictions[idx][0] - 0.5) * 2 # Convert to percentage
plt.title(f'True: {true_label}\nPred: {pred_label}\nConf: {confidence:.2%}',
color='red' if true_label != pred_label else 'black')
plt.tight_layout()
plt.show()
# Dictionary of models
models = {
'ConvNet From Scratch': {
'path': './models/convnet_from_scratch_1123.keras',
'use_features': False
},
'Feature Extraction with Augmentation': {
'path': './models/feature_extraction_with_data_augmentation_1123.keras',
'use_features': False
},
'Fine Tuning': {
'path': './models/fine_tuning_1123.keras',
'use_features': False
},
'Fine Tuning 2': {
'path': './models/fine_tuning_1123_2.keras',
'use_features': False
}
}
# Show misclassified examples for each model
for name, info in models.items():
print(f"\n=== Misclassified Examples for {name} ===")
model = keras.models.load_model(info['path'])
show_misclassified_examples(model, test_dataset, info['use_features'])
=== Misclassified Examples for ConvNet From Scratch === 1/1 [==============================] - 1s 717ms/step 1/1 [==============================] - 0s 353ms/step 1/1 [==============================] - 0s 363ms/step 1/1 [==============================] - 0s 414ms/step 1/1 [==============================] - 0s 352ms/step 1/1 [==============================] - 0s 370ms/step 1/1 [==============================] - 0s 428ms/step 1/1 [==============================] - 0s 370ms/step 1/1 [==============================] - 0s 378ms/step 1/1 [==============================] - 0s 336ms/step 1/1 [==============================] - 0s 355ms/step 1/1 [==============================] - 0s 384ms/step 1/1 [==============================] - 0s 457ms/step 1/1 [==============================] - 0s 401ms/step 1/1 [==============================] - 0s 338ms/step 1/1 [==============================] - 0s 341ms/step 1/1 [==============================] - 0s 348ms/step 1/1 [==============================] - 0s 389ms/step 1/1 [==============================] - 0s 365ms/step 1/1 [==============================] - 0s 305ms/step 1/1 [==============================] - 0s 359ms/step 1/1 [==============================] - 0s 386ms/step 1/1 [==============================] - 0s 354ms/step 1/1 [==============================] - 0s 294ms/step 1/1 [==============================] - 0s 313ms/step 1/1 [==============================] - 0s 316ms/step 1/1 [==============================] - 0s 312ms/step 1/1 [==============================] - 0s 333ms/step 1/1 [==============================] - 0s 323ms/step 1/1 [==============================] - 0s 320ms/step 1/1 [==============================] - 0s 299ms/step 1/1 [==============================] - 0s 322ms/step 1/1 [==============================] - 0s 284ms/step 1/1 [==============================] - 0s 299ms/step 1/1 [==============================] - 0s 302ms/step 1/1 [==============================] - 0s 299ms/step 1/1 [==============================] - 0s 311ms/step 1/1 [==============================] - 0s 300ms/step 1/1 [==============================] - 0s 314ms/step 1/1 [==============================] - 0s 326ms/step 1/1 [==============================] - 0s 306ms/step 1/1 [==============================] - 0s 335ms/step 1/1 [==============================] - 0s 287ms/step 1/1 [==============================] - 0s 301ms/step 1/1 [==============================] - 0s 310ms/step 1/1 [==============================] - 0s 314ms/step 1/1 [==============================] - 0s 300ms/step 1/1 [==============================] - 0s 313ms/step 1/1 [==============================] - 0s 300ms/step 1/1 [==============================] - 0s 286ms/step 1/1 [==============================] - 0s 323ms/step 1/1 [==============================] - 0s 299ms/step 1/1 [==============================] - 0s 305ms/step 1/1 [==============================] - 0s 298ms/step 1/1 [==============================] - 0s 297ms/step 1/1 [==============================] - 0s 303ms/step 1/1 [==============================] - 0s 299ms/step 1/1 [==============================] - 0s 302ms/step 1/1 [==============================] - 0s 296ms/step 1/1 [==============================] - 0s 306ms/step 1/1 [==============================] - 0s 311ms/step 1/1 [==============================] - 0s 322ms/step 1/1 [==============================] - 0s 268ms/step
=== Misclassified Examples for Feature Extraction with Augmentation === 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 5s 5s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 4s 4s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 4s 4s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 2s 2s/step
=== Misclassified Examples for Fine Tuning === 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 4s 4s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 2s 2s/step
=== Misclassified Examples for Fine Tuning 2 === 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 4s 4s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 4s 4s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 4s 4s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 5s 5s/step 1/1 [==============================] - 5s 5s/step 1/1 [==============================] - 8s 8s/step 1/1 [==============================] - 6s 6s/step 1/1 [==============================] - 4s 4s/step 1/1 [==============================] - 4s 4s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 3s 3s/step 1/1 [==============================] - 2s 2s/step
Fine-tuning_2 model(freezing all layers until last 6 layers) performs the best because it effectively combines VGG16's pre-trained weights with appropriate fine-tuning of later layers, achieving the highest accuracy in cats and dogs classification. The accuracy of Fine-tuning_2 model is 0.982, our data is balanced data, so accuracy should be a good way to evaluate model.
Additionally, the feature extraction model is also performing well, as VGG16's pre-trained features are good at distinguishing between cats and dogs. Although the models perform well overall, they still struggle with some cases, such as unusual angles or poses, non-typical breed characteristics, image composition, and background distractions.